Everywhere currently passes "sy"stem, so no actual change.
Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
Acked-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com>
Acked-by: Tim Deegan <tim@xen.org>
local_irq_disable();
if ( cpu_is_haltable(smp_processor_id()) )
{
- dsb();
+ dsb(sy);
wfi();
}
local_irq_enable();
spin_lock_irqsave(&desc->lock, flags);
spin_lock(&gic.lock);
desc->status &= ~IRQ_DISABLED;
- dsb();
+ dsb(sy);
/* Enable routing */
GICD[GICD_ISENABLER + irq / 32] = (1u << (irq % 32));
spin_unlock(&gic.lock);
cpumask_and(&online_mask, cpumask, &cpu_online_map);
mask = gic_cpu_mask(&online_mask);
- dsb();
+ dsb(sy);
GICD[GICD_SGIR] = GICD_SGI_TARGET_LIST
| (mask<<GICD_SGI_TARGET_SHIFT)
{
ASSERT(sgi < 16); /* There are only 16 SGIs */
- dsb();
+ dsb(sy);
GICD[GICD_SGIR] = GICD_SGI_TARGET_SELF
| sgi;
{
ASSERT(sgi < 16); /* There are only 16 SGIs */
- dsb();
+ dsb(sy);
GICD[GICD_SGIR] = GICD_SGI_TARGET_OTHERS
| sgi;
return -EBUSY;
desc->action = new;
- dsb();
+ dsb(sy);
return 0;
}
/* wait for complete flag to be set */
do {
stat = syscfg[V2M_SYS_CFGSTAT/4];
- dsb();
+ dsb(sy);
} while ( !(stat & V2M_SYS_CFG_COMPLETE) );
/* check error status and return error flag if set */
/* switch to slow mode */
writel(0x3, sp810);
- dsb(); isb();
+ dsb(sy); isb();
/* writing any value to SCSYSSTAT reg will reset the system */
writel(0x1, sp810 + 4);
- dsb(); isb();
+ dsb(sy); isb();
iounmap(sp810);
}
local_irq_disable();
cpu_is_dead = 1;
/* Make sure the write happens before we sleep forever */
- dsb();
+ dsb(sy);
isb();
while ( 1 )
wfi();
s_time_t deadline = get_s_time() + 1000 * (s_time_t) usecs;
while ( get_s_time() - deadline < 0 )
;
- dsb();
+ dsb(sy);
isb();
}
static void hdlcd_flush(void)
{
- dsb();
+ dsb(sy);
}
static int __init get_color_masks(const char* bpp, struct color_masks **masks)
/* Flush local TLBs, current VMID only */
static inline void flush_tlb_local(void)
{
- dsb();
+ dsb(sy);
WRITE_CP32((uint32_t) 0, TLBIALL);
- dsb();
+ dsb(sy);
isb();
}
/* Flush inner shareable TLBs, current VMID only */
static inline void flush_tlb(void)
{
- dsb();
+ dsb(sy);
WRITE_CP32((uint32_t) 0, TLBIALLIS);
- dsb();
+ dsb(sy);
isb();
}
/* Flush local TLBs, all VMIDs, non-hypervisor mode */
static inline void flush_tlb_all_local(void)
{
- dsb();
+ dsb(sy);
WRITE_CP32((uint32_t) 0, TLBIALLNSNH);
- dsb();
+ dsb(sy);
isb();
}
/* Flush innershareable TLBs, all VMIDs, non-hypervisor mode */
static inline void flush_tlb_all(void)
{
- dsb();
+ dsb(sy);
WRITE_CP32((uint32_t) 0, TLBIALLNSNHIS);
- dsb();
+ dsb(sy);
isb();
}
static inline void flush_xen_data_tlb_range_va(unsigned long va, unsigned long size)
{
unsigned long end = va + size;
- dsb(); /* Ensure preceding are visible */
+ dsb(sy); /* Ensure preceding are visible */
while ( va < end ) {
asm volatile(STORE_CP32(0, TLBIMVAH)
: : "r" (va) : "memory");
va += PAGE_SIZE;
}
- dsb(); /* Ensure completion of the TLB flush */
+ dsb(sy); /* Ensure completion of the TLB flush */
isb();
}
static inline void flush_xen_data_tlb_range_va(unsigned long va, unsigned long size)
{
unsigned long end = va + size;
- dsb(); /* Ensure preceding are visible */
+ dsb(sy); /* Ensure preceding are visible */
while ( va < end ) {
asm volatile("tlbi vae2, %0;"
: : "r" (va>>PAGE_SHIFT) : "memory");
va += PAGE_SIZE;
}
- dsb(); /* Ensure completion of the TLB flush */
+ dsb(sy); /* Ensure completion of the TLB flush */
isb();
}
static inline void clean_xen_dcache_va_range(void *p, unsigned long size)
{
void *end;
- dsb(); /* So the CPU issues all writes to the range */
+ dsb(sy); /* So the CPU issues all writes to the range */
for ( end = p + size; p < end; p += cacheline_bytes )
asm volatile (__clean_xen_dcache_one(0) : : "r" (p));
- dsb(); /* So we know the flushes happen before continuing */
+ dsb(sy); /* So we know the flushes happen before continuing */
}
static inline void clean_and_invalidate_xen_dcache_va_range
(void *p, unsigned long size)
{
void *end;
- dsb(); /* So the CPU issues all writes to the range */
+ dsb(sy); /* So the CPU issues all writes to the range */
for ( end = p + size; p < end; p += cacheline_bytes )
asm volatile (__clean_and_invalidate_xen_dcache_one(0) : : "r" (p));
- dsb(); /* So we know the flushes happen before continuing */
+ dsb(sy); /* So we know the flushes happen before continuing */
}
/* Macros for flushing a single small item. The predicate is always
#define wfi() asm volatile("wfi" : : : "memory")
#define isb() asm volatile("isb" : : : "memory")
-#define dsb() asm volatile("dsb sy" : : : "memory")
-#define dmb() asm volatile("dmb sy" : : : "memory")
+#define dsb(scope) asm volatile("dsb " #scope : : : "memory")
+#define dmb(scope) asm volatile("dmb " #scope : : : "memory")
-#define mb() dsb()
-#define rmb() dsb()
-#define wmb() dsb()
+#define mb() dsb(sy)
+#define rmb() dsb(sy)
+#define wmb() dsb(sy)
-#define smp_mb() dmb()
-#define smp_rmb() dmb()
-#define smp_wmb() dmb()
+#define smp_mb() dmb(sy)
+#define smp_rmb() dmb(sy)
+#define smp_wmb() dmb(sy)
#define xchg(ptr,x) \
((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))